perm filename ARP79F.TEX[PRO,JMC]1 blob
sn#722139 filedate 1983-08-04 generic text, type C, neo UTF8
COMMENT ⊗ VALID 00016 PAGES
C REC PAGE DESCRIPTION
C00001 00001
C00003 00002 \input basic
C00007 00003 \output{\baselineskip 18pt\page}
C00009 00004 \parskip .15in plus 5pt \baselineskip 15pt \lineskip 1pt
C00010 00005 \initsect
C00012 00006 \sect Formal Reasoning.
C00032 00007 \sect Advice-taking ANALYST.
C00071 00008 \sect Formalisms for Reasoning about Programs.
C00103 00009 \sect EKL.
C00108 00010 \sect Lisp Performance Evaluation.
C00112 00011 \sect Common Lisp.
C00122 00012 \sect Automatic Construction of Special-purpose Programs.
C00136 00013 \sect Epistemology and Formalization of Concurrent and Continuous Action.
C00140 00014 \sect Proving Facts About Lisp.
C00142 00015 \sect References.
C00153 00016 \sect Publications.
C00159 ENDMK
C⊗;
\input basic
\jpar 1000
\varunit 1truein
\parskip .15in plus 5pt \baselineskip 15pt \lineskip 1pt
\def\rm{\:a} \def\sl{\:n} \def\bf{\:q} \def\it{\:?}
\font m=cmsc10 % Small caps
\def\sc{\:m} % Small Caps (10pt)
\font p=cms8
\font s=cmb8
\font c=cmr8
\font N=cmdunh % Titles
\font M=cmb12 % Titles
\font <=cmb10 % Bolder
\font t=cmtt9[tex,sys]
%\output{\advcount0\baselineskip 18pt\page}
\def\sect#1.{\vskip 20pt plus20pt minus 7pt\penalty -100
\secta#1.}
\def\secta#1.{\noindent{\bf \count1.\quad #1}\par\penalty 1000\advcount1\par
\vskip 5pt plus4pt minus 3pt\penalty 1000}
\def\eightpoint{\baselineskip9.5pt
\def\rm{\:c} \def\sl{\:p} \def\bf{\:s}
\rm}
\setcount1 1
\def \initsect{\setcount1 1\baselineskip 15pt}
\setcount0 1
\setcount2 0
\def\footnote#1{\advcount2\ofootnote{\count2}{#1}}
\def\ofootnote#1#2{{\eightpoint$↑{\hbox{#1}}$}\botinsert{\vskip 12pt
plus 10pt minus10pt \baselineskip 10pt\hbox par 6.5truein{\eightpoint
\raise 4pt\hbox{#1}\hskip 2pt #2}}}
\def\ssect#1.{\yyskip{\bf\penalty -50\hbox{\vbox{\hbox{#1}}}
\penalty 800}}
\def \ni{\noindent}
\def\textindent#1{\noindent\hbox to 19pt{\hskip 0pt plus 1000pt minus 1000pt#1\
\!}}
\def\hang{\hangindent 19pt}
\def\numitem#1{\yskip\hang\textindent{#1}}
\def\ref:#1.#2.{[\hbox{#1 #2}]}
\def \bcode {\par\vfil\penalty400\vfilneg\parskip 0pt plus 1 pt\vskip .25 in\: t\beginpassasis}
\def \ecode {\endpassasis\: a\par\vskip .05 in\vfil\penalty500\vfilneg\parskip .1 in plus 5pt}
\input pai[can,rpg]
\def\eat#1\endeat{} % macro for extended comments
\def\makeref:#1.#2.#3{\par{\noindent \parshape 2 0truein 6.5truein
.5truein 6.0truein \hbox{[\hbox{#1 #2}]}\linebreak
{#3}}\par\parshape 0\vfil\penalty 0\vfilneg}
\def\article#1{{\sl #1}}
\def\book#1{{\:< #1}}
\def\yyskip{\penalty-100\vskip8pt plus6pt minus4pt}
\def\yskip{\penalty-50\vskip 3pt plus 3pt minus 2pt}
\def\bslskp{\baselineskip 15pt}
\def\dispskip{\vskip12pt plus 10pt minus 5pt}
\def\bite{\noindent $\bullet$\ }
\def\Hpar#1{\par\parshape 2 .2truein 6.3truein .5truein 6.0truein #1\par\parshape 0}
\def\HHpar#1{\par\parshape 2 0truein 6.0truein .5truein 5.5truein #1\par\parshape 0}
\def\BBpar#1{\numitem{$\bullet$}#1}
\def\lead{\leaders\hbox to 10pt{\hfill.\hfill}\hfill}
\def\contents#1#2{\hbox to size{#1\lead #2}}
\output{\baselineskip 18pt\page}
\:M
\baselineskip 18pt
\ctrline{Report to}
\ctrline{The Defense Advanced Research Projects Agency}
\ctrline{for}
\vskip .5truein
\ctrline{BASIC RESEARCH}
\vskip .2truein
\ctrline{IN}
\vskip .2truein
\ctrline{ARTIFICIAL INTELLIGENCE}
\vskip .2truein
\ctrline{AND}
\vskip .2truein
\ctrline{FORMAL REASONING}
\ctrline{John McCarthy}
\ctrline{Professor of Computer Science}
\vfill
\ctrline{July 1983}
\:M
\ctrline{Department of Computer Science}
\ctrline{School of Humanities and Sciences}
\ctrline{Stanford University}
\vskip 1.75truein
\ni\it{This research was supported by the Advanced Research Projects Agency of
the Department of Defense under ARPA Order No. 2494, Contract MDA903-80-C-0102.}
\eject
\parskip .15in plus 5pt \baselineskip 15pt \lineskip 1pt
\output{\baselineskip 18pt\page\ctrline{\curfont a\count0}\advcount0}
\rm
\initsect
\sect Introduction.
\save1\hbox{\count0}
Applied research requires basic research to replenish the stock of ideas
on which its progress depends. The long range goals of work in basic AI
and formal reasoning are to make computers carry out the reasoning
required to solve problems. Our work over the past few years has made it
considerably clearer how our formal approach can be applied to AI and MTC
problems. This brings application nearer, especially in the area of
mathematical theory of computation.
The rest of this section will discuss the recent research of the people
in the Formal Reasoning Group and their long-term research interests and
directions.
\sect Formal Reasoning.
\ssect Accomplishments.
John McCarthy continued his work in formalization
of the commonsense facts about the world needed for intelligent
behavior and also work in formalization of computer programs.
McCarthy's personal research concerns the formalization of commonsense
knowledge and commonsense reasoning in mathematical/logical languages.
Such formalization is required to represent the information in the memory
of a computer and to make programs that can reason with the information in
order to solve problems. It is now widely agreed (for example, by
Newell, Nilsson, and Minsky) that progress in artificial intelligence is
limited by our ability to express commonsense knowledge and reasoning.
There isn't widespread agreement on how to do this, and McCarthy's work
explores one end of the spectrum of ideas---using mathematical logic.
McCarthy has made substantial progress on three problems:
\ni 1. Non-monotonic reasoning.
In 1980 McCarthy published ``Circumscription: a method
of non-monotonic reasoning.'' Since that time circumscription
has been further developed. A forthcoming paper will describe
a new formalization and many new applications.
There seem to be many kinds of non-monotonic reasoning formalizable by
circumscription, and it isn't clear how to unify them. It also isn't
clear whether circumscription is a universal form of non-monotonic
reasoning or whether other forms will be required. Circumscription is
known to subsume many examples of non-monotonic reasoning given earlier by
McDermott and Doyle, and by Reiter; and there are no outstanding cases
where it is not known how to do it.
Here are some example uses of circumscription.
\numitem{a.}As a communication convention. Suppose A tells B about
a situation involving a bird. If the bird may not be able to fly, and this
is relevant to solving the problem, then A should mention
the relevant information. Whereas if the bird can fly, there is
no requirement to mention the fact.
\numitem{b.}As a database or information storage convention. It can be a
convention of a particular database that certain predicates have their
minimal extension. This generalizes the closed-world assumption of
Reiter. When a database makes the closed-world assumption for all
predicates it is reasonable to imbed this fact in the programs that use
the database. However, when there is a priority structure among the
predicates, we need to express the priorities as sentences of the
database, perhaps included in a preamble to it.
\numitem{}Neither 1 nor 2 requires that most birds can fly. Should it happen in
situations 1 and 2 above that most birds cannot fly, the convention may
lead to inefficiency but not incorrectness.
\numitem{c.} As a rule of conjecture. This use was emphasized in
\ref:McCarthy.1980.. The circumscriptions may be regarded as expressions
of some probabilistic notions such as `most birds can fly' or they may be
expressions of simple cases. Thus it is simple to conjecture that there
are no relevant present material objects other than those whose presence
can be inferred. It is also a simple conjecture that a tool asserted to
be present is usable for its normal function. Such conjectures sometimes
conflict, but there is nothing wrong with having incompatible conjectures
on hand. Besides the possibility of deciding that one is correct and the
other wrong, it is possible to use one for generating possible exceptions
to the other.
\numitem{d.}As a representation of a policy. The example is Doyle's ``The meeting
will be on Wednesday unless another decision is explicitly made.''
\numitem{e.}As a very streamlined expression of probabilistic information when
numerical probabilities, especially conditional probabilities, are
unobtainable. Since circumscription doesn't provide numerical
probabilities, its probabilistic interpretation involves probabilities that
are either infinitesimal, within an infinitesimal of one, or
intermediate---without any discrimination among the intermediate values.
Circumscriptions give conditional probabilities. Thus we may treat
the probability that a bird cannot fly as an infinitesimal. However, if
the rare event occurs that the bird is a penguin, then the conditional
probability that it can fly is infinitesimal, but we may hear of some rare
condition that would allow it to fly after all.
\numitem{}Why don't we use finite probabilities combined by the usual laws? That
would be fine if we had the numbers, but circumscription is usable when we
can't get the numbers or find their use inconvenient. Note that the
general probability that a bird can fly may be irrelevant, because we are
interested in particular situations which weigh in favor or against a
particular bird flying.
\numitem{}Notice that circumscription does not provide for weighing evidence; it is
appropriate when the information permits snap decisions. However, many
cases nominally treated in terms of weighing information are in fact cases
in which the weights are such that circumscription and other defaults work
better.
\ni 2. Formalization of knowledge about knowledge.
The importance of representing knowledge about knowledge is discussed
elsewhere in this proposal and our previous proposals. In particular,
the proposed `intelligence analyst' requires knowledge about
knowledge.
McCarthy developed a first-order language that admits Kripke-style
possible worlds as objects and uses the Kripke accessibility relation to
formalize the knowledge that a person has of the value of an expression.
McCarthy showed how to use this formalism to express the facts of a
difficult mathematical puzzle involving knowledge---the following puzzle
of `Mr. S and Mr. P.'
Two numbers are chosen between 2 and 99 inclusive. Mr. P
is told the product and Mr. S is told the sum. The following
dialog occurs:
\ni Mr. P: I don't know the numbers.
\ni Mr. S: I knew you didn't know them. I don't know them either.
\ni Mr. P: Now I know the numbers.
\ni Mr. S: Now I know the numbers.
\ni Problem: What are the numbers?
\ni Answer: The numbers are 4 and 13.
The key difficulty to formalizing `Mr. S and Mr. P' comes from
the need to represent ignorance. How do we represent the facts that
all Mr. S initially knows about the numbers is given by his knowledge
of the sum and that all he knows about Mr. P's knowledge is that Mr.
P knows the product? Representation of this ignorance is accomplished
by asserting the existence of a sufficiently comprehensive collection
of possible worlds accessible to Mr. S. Using the possible worlds
in the language itself rather than regarding them as metalinguistic
devices has been uncongenial to logicians. However, none of the experts on
modal logic to whom the problem has been submitted were able to
formulate it directly in modal logic.
This example problem attracted our attention to an issue that is
fundamental to all programs that reason about knowledge---the
representation of ignorance. Thus the advice-taking ANALYST (discussed
below) program may conjecture that all the Russians should know about a
certain radar is what can be deduced from its appearance, where it is
used, and the articles in Aviation Week. We can express this by saying
that any plausible conjecture compatible with this information is a
possibility for them. If they behave so as to rule out such a
possibility, then we conjecture that they have an additional source of
information. The explicitly Kripkean formalization developed in
connection with `Mr. S and Mr. P' should work here as well.
McCarthy's formalization of `Mr. S and Mr. P' was modified
by Ma Xiwen, a Chinese visitor, and used for an FOL proof that
transformed the problem into a purely arithmetic problem. Unfortunately,
McCarthy found an error in Ma's axioms, and it isn't clear that
when the error is fixed, the proof still works. Also the axioms
describing the effect of learning---what happens when Mr. P
learns that Mr. S knew he didn't know the numbers---seem to have
some defects. Formalizing the effect of learning that other people
know something (or do not) is required for making the ANALYST program
work and in any program with similar capabilities.
\ni 3. Introspection in logic programming. In the early 1970's Alain
Colmerauer of Marseilles University in France and Robert Kowalski of
Imperial College in London proposed the use of first order logic as a
programming language, and the Marseilles group subsequently embodied these
ideas in a programming language called Prolog. Prolog has become popular
in Europe for artificial intelligence use, and the Japanese `Fifth
Generation' project proposes to use logic programming. Of people who have
used both Lisp and Prolog, some prefer one and some the other. Alan
Robinson and others have tried to develop languages that allow both styles
of programming. McCarthy has tried Prolog and still prefers Lisp.
However, certain issues of programming style come up more clearly in
Prolog than in Lisp. One of these can be called `introspection,' wherein
a program uses itself and its memory state as data. This is similar to
`reflection' as studied by Brian Smith in his well-known Harvard PhD
thesis.
Kowalski proposed a slogan
``$\hbox{Algorithm}=\hbox{logic}+\hbox{control}$,'' and proposed that
Prolog permitted a neat separation. McCarthy has investigated this claim
using a map-coloring logic program devised by Luis Pereira and Antonio
Porto of the University of Lisbon in Portugal. He noted that two
efficient algorithms for map coloring could be regarded as having the same
logic as the original Pereira-Porto logic program but more sophisticated
control.
One of these algorithms involved a `postponement heuristic' that
re-ordered goals so that goals that could still be realized regardless of
how the previous goals were achieved were put last. In the map-coloring
case, after some goals have been postponed, still others become
postponable; this process can be repeated.
The second algorithm uses so-called Kempe transformations
to modify assignments already made to variables when the algorithm
gets stuck. It is far superior to backtracking for map-coloring,
but it can still be described as control applied to the original
Pereira-Porto logic. However, this kind of control requires that
the program, when stuck, go into an `introspective mode' wherein
it looks at data structures created by the interpreter that are
not ordinarily accessible to the program itself. This is the
work whose ideas overlap those of Brian Smith. Smith applied them
to Lisp, but has recently been exploring the idea that they are
even more applicable to a logic programming language such as
Prolog.
The two kinds of introspection can be called syntactic
and semantic. The postponement heuristic is syntactic in that
it involves looking at the goals and re-ordering them before
attempting to realize any of them. The Kempe heuristic is
semantic in that it doesn't re-order the goals, but modifies
the order in which alternatives are tried in a task-dependent
manner and requires that the program look at its own stack
of tasks.
McCarthy's results show that in this case the Kowalski
doctrine can be maintained in the map-coloring case. Both algorithms
can be regarded as sophisticated control applied to simple logic.
The control mechanisms required in the two cases go far beyond
what the logic programmers---for example, Keith Clark, Pereira and Porto,
and Herve Gallaire---had been willing to contemplate. While they
have some generality it seems likely that additional problems
will require additional mechanism.
So far it hasn't proved especially convenient to program
by beginning with a simple logic and adding a sophisticated control.
However, such techniques should make it easier to prove the
correctness of programs. Further exploration of the Kowalski
doctrine applied to logic programming may turn up interesting
and useful methods of control and other heuristics.
\ref:McCarthy.1982. ``Coloring Maps and the Kowalski Doctrine''
presents some of these ideas.
\sect Advice-taking ANALYST.
Lewis Creary and Richard Gabriel have been collaborating (Creary full-time, Gabriel
quarter-time) on implementing a prototype version of the advice-taking ANALYST
program. The motivation, design criteria, basic approach, and leading ideas
for this program were described fully in our 1981 proposal and will
not be repeated in detail here. To summarize briefly, the basic goal of the
project is to construct an advanced, advice-taking problem solver for
analyzing interesting kinds of ``intelligence'' information---in short, a
first incarnation of the ``intelligence'' ANALYST mentioned in our 1979
proposal as a potential long-range application. This program will attempt to
infer concealed facts from known facts and from reasonable conjectures that it
has made. It will deal with information about beliefs, desires, purposes,
preferences, aversions, and fears. It will ascribe plans and goals to people
on the basis of their observed actions and commonsense background knowledge,
and it will use these ascribed plans and goals to predict future actions. Our
basic approach to construction of an ANALYST stems from John McCarthy's design
sketch for an ``Advice Taker'' \ref:McCarthy.1959. and presently is based on
the following main ideas:
\ni 1. A general framework for the structuring, description, acquisition, and
use of specialized knowledge as the foundation for a domain-independent
commonsense problem solver. Some of the specialized knowledge will concern
the application domain, and some will be meta-knowledge concerning the
structure, capabilities, etc. of the ANALYST program itself.
\ni 2. Integration of the frames/units and predicate calculus approaches to
knowledge representation.
\ni 3. Extension of the standard predicate calculus formalism to permit
representation of adverbial constructions, sortal quantifiers, information
concerning beliefs, desires and intentions, causal connections between states
of affairs, subjunctive conditional statements, and $λ$-abstraction.
\ni 4. Fregean representation of propositional attitudes, coupled with the
simulative use of the program's own human-like thought mechanisms as a means
of recognizing and analyzing the beliefs, desires, plans, and intentions of
people and other intelligent organisms.
\ni 5. An {\sl interacting considerations} model of commonsense factual
reasoning, in which reasoning involves the weighing and composition of all
reasonably discoverable considerations bearing on a set of target
propositions as the basis for drawing conclusions.
\ni 6. Comparative partial matching as the basis for selection of the most
appropriate methods, procedures, and abstract plan schemata during problem
solving.
\ni 7. Interactive advice seeking and taking as a means of knowledge acquisition.
\ssect Accomplishments.
\ni 1. A new logical language was designed for use by the ANALYST to
represent commonsense information.
An essential requirement for effective commonsense reasoning and
problem-solving is the ability to represent commonsense knowledge about the
everyday world in a way that accurately reflects its logical consequences.
Providing such an ability has not proved easy for AI researchers, since the
standard predicate calculus and its higher-order relatives, while sufficient
for most mathematical purposes, do not lend themselves in a natural way to the
representation of several important types of commonsense proposition. On the
other hand, while non-logical ad-hoc representation schemes can be effective
in special cases, they tend to lack the flexibility and generality needed for
commonsense reasoning, and needlessly forego the benefits of a systematic
truth-semantics that are available to logically-oriented representation
schemes.
To meet this problem a new logical language named NFLT was designed as the
``thought-vehicle'' for the advice-taking ANALYST. `NFLT' is an acronym for
`Neo-Fregean Language of Thought.' NFLT is the result of augmenting and
partially reinterpreting the standard predicate calculus formalism to permit
representation of adverbial constructions; sortal quantifiers; information
concerning beliefs, desires, and intentions; causal connections between states
of affairs; subjunctive conditional statements; and $λ$-abstraction. Many
commonsense descriptions that are difficult or impossible to express
adequately in the standard predicate calculus are expressible directly and
naturally in NFLT. While NFLT is much closer semantically to natural language
than is the standard predicate calculus, and is to some extent inspired by
psychologistic considerations, it is nevertheless a formal logic admitting of
a model-theoretic semantics. The intended semantics incorporates a Fregean
distinction between sense and denotation, with associated principles of
compositionality. A working paper was written which describes this language
\ref:Creary.1982.. Here is a relatively short example of NFLT's expressive power:
\ni English:
\numitem{}Being careful not to move anything else on his desk, John picks
up $\hbox{hammer}↓1$ with his right hand in order to drive a nail with it.
\ni NFLT Display Syntax:
\bcode
$\{∃$ $\up$X.CONCEPT($\up$X, OF:!DESK JOHN)
$\up$Y.CONCEPT($\up$Y, OF:HAMMER1)
Z.NAIL
$\up$X1.CONCEPT($\up$X1, OF:Z)$\}$.PICKUP(JOHN, HAMMER1, FROM:!DESK JOHN,
WITH:!RIGHT-HAND JOHN, INORDERTHAT:$\up$[DRIVE(I, $\down$[$\up$X1],
WITH:$\down$[$\up$Y])], WITHCARETHAT:$\up$[$\{∀$Y1.PHYSOB Y1 $∧$
ON(Y1, $\down$[$\up$X]) $∧$ $¬$(Y1 $= ¬\down$[$\up$Y])$\}$.$¬$MOVE(I, Y1)])
\ecode
\ni (Here, the `$\down$' operator (occurring within the scope of an `$\up$'
operator) locally suppresses further conceptual raising of the variable it
governs, much as the MacLisp `,' (occurring within a backquote construction)
locally suppresses quotation of the S-expression it governs.)
\ni 2. MacLisp programs were written for the translation, display, and logical
analysis of NFLT formulas, and for the construction and management of a
``uniquifying'' discrimination net that stores all NFLT formulas.
The internal syntax of NFLT is variable-free and has many of the features
of semantic-network formalisms. For input purposes only a somewhat
different Lisp-form syntax is presently used. This is translated
immediately upon entry into the internal formalism. For display purposes
only still another syntax is used, this one being more concise and much
more easily readable by people than the internal syntax, and it is closer
to standard logical notation than the input syntax. Thus NFLT is
presently implemented with three distinct syntaxes---one for input, one
for memory and reasoning, and one for display.
NFLT-expressions that are in some way associated with potentially useful
information are presently stored in a discrimination net that maintains a
unique, canonical version of each compound concept and proposition in the
ANALYST's explicit memory. An NFLT-expression is located in the
discrimination net by computing for it a unique list of indexing keys.
The first element of this key-list is a concept-construction operator
that specifies one way in which the embodied concept may be constructed
from component concepts. The remaining members of the key-list are the
component concepts, either atomic or compound. Each logically-compound
component concept is itself indexed in the discrimination net by its own
key-list, consisting of a concept-construction operator and component
concepts. Thus the discrimination net implicitly records for each stored
compound concept a complete analysis into {\sl atomic} component concepts.
Quite apart from its role in the indexing scheme, this conceptual analysis is
of theoretical significance as a demonstration of the compositional semantics
of NFLT.
\ni 3. A matcher for NFLT was designed and partially implemented. It is a
unifier and was implemented by writing a matcher-generator and applying
that generator to a description of the NFLT data-structure. This
matcher-generator is a major result since it allows one to abstractly
describe pattern and datum data structures and to automatically generate a
unifying pattern matcher on those data structures. A significant feature
of the class of matchers that the matcher-generator produces is that they
unify `star-variables.' Star-variables match $0$ or more elements in a
single-level stream of elements. The Gabriel reasoner (described below)
uses a tree-structure unifier which was the model for the
matcher-generator. Recent work on the tree-structure unifier needs to be
incorporated into the matcher-generator and hence the NFLT matcher.
Recent insights concerning the use of subformulas of quantified NFLT
expressions as patterns also need to be taken into account.
\ni 4. Epistemological work was done on a scheme for causal reasoning that
will offer new solutions to the frame and qualification problems. This
work is presently being completed and implemented in the ATREAS commonsense
reasoner described in item 7 below.
\ni 5. Detailed epistemological studies were made of some of the
commonsense factual reasoning involved in plan production and of the
interaction of this reasoning with the planning process. This reasoning
was found to be quite complex. As an example of some of the relevant
knowledge that we have formalized in these studies, we present one of the
commonsense causal laws that our reasoner is currently being developed to
utilize (Law of Causal Influence for Driving):
Here is a rough statement in English of the law: ``If a person
tries to drive a car from location $L↓1$ to location $L↓2$ via drive-path
V, leaving at $T↓1$ and arriving by $T↓2$, then, provided that the car is
located at $L↓1$ at time $T↓1$, and that there is sufficient time for the
intended driving, and provided that certain further conditions are met
concerning the condition of the car, drive-path, and person, this will
tend to cause the person to succeed in his attempted driving.''
The following is a computer-readable formalized version of the law (NFLT
Lisp-form input syntax):
\bcode
((ALL P PERSON
X AUTOMOBILE
$\up$X (CONCEPT $\up$X (OF X))
L1 LOCATION
$\up$L1 (CONCEPT $\up$L1 (OF L1))
L2 LOCATION
$\up$L2 (CONCEPT $\up$L2 (OF L2))
V (DRIVE-PATH V L1 L2)
$\up$V (CONCEPT $\up$V (OF V))
T1 TIME
$\up$T1 (CONCEPT $\up$T1 (OF T1))
T2 TIME
$\up$T2 (CONCEPT $\up$T2 (OF T2)) )
(TENDS-TO-CAUSE
(AND (LOCATED-AT X L1 (ATT T1))
(GREATER (!TIME-DIFFERENCE T1 T2) (!DRIVE-TIME L1 L2 V))
(TRY P ($\up$(DRIVE I (VEHICLE ($\down$ $\up$X))
(FROM ($\down$ $\up$L1))
(TO ($\down$ $\up$L2))
(VIA ($\down$ $\up$V))
(LEAVING-AT ($\down$ $\up$T1))
(ARRIVING-BY ($\down$ $\up$T2)) ) )
(WHILE (AND (RUN-OK X)
(DRIVABLE V)
(PHYSICALLY-ABLE P DRIVE)
(KNOW-HOW P $\up$DRIVE)
(EFFECTIVELY-IDENTIFIABLE
P ($\up$(($λ$ P X L1 L2 V T1 T2)
(DRIVE P (VEHICLE X)
(FROM L1)
(TO L2)
(VIA V)
(LEAVING-AT T1)
(ARRIVING-BY T2) ) ))
(C1 $\up$I) (C2 $\up$X) (C3 $\up$L1) (C4 $\up$L2)
(C5 $\up$V) (C6 $\up$T1) (C7 $\up$T2) ) ))
(START-TIME T1)
(STOPPING-ONLY-FOR-STRONG-REASONS) ) )
(DRIVE P (VEHICLE X)
(FROM L1)
(TO L2)
(VIA V)
(LEAVING-AT T1)
(ARRIVING-BY T2) )
(STRENGTH SUFFICIENT-IF-UNPREVENTED) ) )
\ecode
\ni 6. A storage manager for ANALYST has been implemented and tested. However,
it is not presently being used because the space problems it would help to
alleviate have not yet arisen in our prototype implementations.
\ni 7. Two prototype commonsense-reasoning modules for the ANALYST were implemented
and tested. One was written by Richard Gabriel and is used to explore modes of
reasoning, methods of evidence gathering, and efficiency issues in searching for
information. The second reasoner, written by Lewis Creary, is partially based
on the first, but utilizes a more complex representation language (NFLT) and
reasoning graph, and a redesigned reasoning algorithm. Gabriel's reasoner
is used for rapid experimentation with ideas, while the final ANALYST module
will descend from Creary's reasoner. Results of the most successful
experiments with the former will be incorporated into the latter.
A pervasive and fundamental characteristic of commonsense factual reasoning is
the weighing of all reasonably discoverable relevant considerations before
drawing a conclusion. The main structural components of a simple commonsense
factual inference are a target proposition (the conclusion or its negation)
and a number of separate {\sl considerations} (i.e., sub-arguments) that
bear evidentially on this proposition. The various considerations bearing on
a single target proposition may differ in {\sl directional tendency}
(confirmation or disconfirmation), in {\sl inherent relative strength},
and in {\sl force} (i.e., absolute strength {\sl in situ}). Values on
the first two of these attributes for a given consideration depend only on the
nature of the consideration, whereas the {\sl force} of a consideration in
a particular application depends also on how well its premises are known.
Both of our commonsense reasoning programs attempt to find and evaluate
all reasonably discoverable considerations bearing on the truth of a given
statement. Gabriel's REASON program currently handles 19 specific types
of consideration. Among the types with maximal inherent relative strength
are those corresponding to rules of deductive logic, such as tautological
considerations (supporting statements true because of their logical form),
Boolean considerations (for example, $\hbox{A}∧\hbox{B}$ is true if A and B are
true), modus ponens considerations (B is true if A and
$\hbox{A}⊃\hbox{B}$ are true), identity considerations
($\hbox{P}(\hbox{a})$ is true if $\hbox{b}=\hbox{a}$ and
$\hbox{P}(\hbox{b})$ are true), and quantificational considerations
($\hbox{P}(\hbox{a})$ is true if $∀x.\hbox{P}(x)$ is true). Rewrite rules
are included as reasoning experts to obtain the force of standard logical
deductions. For instance $¬¬\hbox{S}$ is equivalent to S and
$¬∀x.¬\hbox{P}(x)$ is equivalent to $∀x.\hbox{P}(x)$. There are 9 such
reasoning experts.
Among the consideration-types with less-than-maximal inherent relative
strength is the {\sl statistical} consideration, depending on a premise
containing a statistical quantifier, such as `GREAT-MAJORITY.' When the
truth of $\hbox{can-fly}(\hbox{tweety})$ is supported by the statements
$\hbox{bird}(\hbox{tweety})$ and
$((\hbox{GREAT-MAJORITY\ }\hbox{x\ }\hbox{bird})\hbox{can-fly}(\hbox{x}))$,
the support is less strong than it would be if `GREAT-MAJORITY' were replaced
by `ALL.' Stated another way, if one knows with certainty that Tweety is a
bird and that the great majority of birds can fly, then, provided that this
knowledge is not preempted by other statistical knowledge one has about
Tweety, it gives rise to a statistical consideration in support of the
statement that Tweety can fly. While this consideration is genuine, it
provides no certainty that Tweety can fly. On the other hand, if one knows
with certainty that that Tweety is a bird and that {\sl all} birds can fly,
then there exists a deductive consideration conferring certainty on the
conclusion that Tweety can fly.
There are other non-deductive consideration-types handled by Gabriel's
program, but they are presently subject to debate and possible revision.
\eat
Other non-deductive consideration-types (in addition to the statistical)
handled by Gabriel's reasoner are in a class of interesting
consideration-types which results from the observation that $P(a)$ may not
be the case, but perhaps $∃x.x=a∧P(x)$. This mode of reasoning can
continue with $∃y.∃x.y=x∧a=x∧P(y)$. Since this line of reasoning leads the
program to try to enumerate all of the individuals that it knows about,
cutting off the search at some point and reasoning about the extent of the
list collected so far must be performed if any conclusion is to be made.
For example, if a particular penguin is known, and if it is known that
this particular penguin is the only possible candidate penguin that could
be identified as Tweety, then it is only necessary to look to that penguin
in the search to make a strong case that if Tweety is not this particular
penguin, that Tweety is not, then, a penguin, and that Tweety can fly.
This mode of reasoning is similar to one that Allan Collins
suggested. That mode allowed one to state that if some fact were true,
then it would be important enough to be known.
Two reasoning experts are currently included in this class, along with an
expert to control combinatorial explosion during the search process.
A further embellishment on this mode of reasoning concerns the names
that are given to individuals. For instance, Tweety is represented
to the reasoning program as a data structure---a memory node of sorts---which
includes a name as part. Thus it is explicitly stated that the name of
the individual represented by the node corresponding to Tweety is `Tweety.'
There is a reasoning expert that infers that most individuals with different
names are distinct.
\endeat
The consideration-gathering process consists of building what was originally
conceived as a tree of dependencies, where the daughters of a node represent
the considerations that bear on that node. The original tree-building process
was such that it was possible for two distinct nodes in the tree to have the
same content (with different links). However, it was found experimentally
that if care is not taken to avoid such a duplication of node-contents,
serious inefficiencies in searching can arise. For this reason, the
consideration-dependency-network is now a graph that is usually not
a tree, with each node having a unique content. A node in this graph is a
content-triple comprising a target proposition, a reasoning expert, and a
reasoning context, together with various links and pointers. The uniqueness
of node-contents has been preserved (but with a changed notion of content) in
Creary's reasoner.
Creary's ATREAS commonsense-reasoning module for the ANALYST utilizes the
new representation-language, NFLT, and a very general reasoning graph capable
of supporting both forward and backward factual reasoning. The reasoning
graph is the main data structure used by the commonsense reasoner and
consists of complex propositional nodes connected by complex labelled links
representing considerations (i.e., logical sub-arguments both for and against
various target propositions). Commonsense reasoning begins with a specific
goal---to evaluate the epistemic status of a particular (usually small) set of
target propositions. This goal, together with a certain allotment of
information-processing effort that may be expended on it, arises within some
larger problem-solving context involving factual inquiry, planning,
deliberation, or the like. A goal-directed, heuristically-guided search is
conducted for considerations both for and against the target propositions.
This search may give rise to new target propositions, for which considerations
are also sought, in their turn. The consideration-search terminates when
either it runs out of target propositions or the allotted effort (currently
in arbitrary units) has been expended. Finally all the considerations
discovered are evaluated and composed and a conclusion is drawn, if
warranted.
The search for considerations is directed by specialized reasoning experts.
In a full bi-directional implementation there will exist both a forward and a
backward reasoning expert for each rule of evidence. At present only a few
of the backward reasoning experts have been implemented. Currently under active
development are more reasoning experts (both deductive and non-deductive) and
heuristic knowledge for guidance of the consideration-search.
There is presently on-line at SAIL a small demonstration version of the ATREAS
system, which can be run in order to get a feel for the general character of
the reasoning program, the major data structures that it uses, its supporting
interactive environment, and the NFLT representation language. The premises
available for the demonstration include information about birds and penguins
in general, and about a particular penguin named `Slick.' Given only that
Slick is a bird, that the great majority of birds can fly, and that no
penguins can fly, the program concludes it is very likely that Slick can fly.
However, if we add to these same premises the further information that Slick
is a penguin, the program will then conclude it is most {\sl un}likely
that Slick can fly. This demonstration illustrates what is now widely
referred to as the `non-monotonicity' of commonsense reasoning: the fact
that the mere addition of new premises can invalidate a previously reached
conclusion---something that cannot happen in purely deductive reasoning.
The theoretical model of commonsense reasoning on which this reasoning program
is based is discussed in a forthcoming technical report \ref:Creary.1983..
\ni 8. A substantial interactive research environment for Creary's
commonsense reasoner was implemented, tested, and documented on-line. It
consists of an interactive program control and four co-routined
sub-programs for exercise of the reasoner and examination of data
structures. These sub-programs are as follows:
\numitem{a.}EXERCISE-COMMONSENSE-REASONER (permits convenient interactive
exercise of the commonsense reasoning program).
\numitem{b.}EXPLORE-TASK-RECORD (permits convenient interactive examination of
a previously executed agenda of reasoning tasks).
\numitem{c.}EXPLORE-REASONING-GRAPH (permits convenient interactive examination
of reasoning graphs produced by the commonsense reasoner). An important and
particularly useful feature of this sub-program is its ability to provide
concise, easily readable summaries of all the considerations found by ATREAS
during the reasoning process.
\numitem{d.}EXPLORE-DISCRIMINATION-NET (permits convenient interactive
examination of a discrimination net that uniquely indexes logically compound
concepts and propositions).
These programs relieve much of the tedium that would otherwise be involved in
experimenting with the ATREAS reasoner and permit efficient high-level
debugging of inference rules and factual information. The programs presently
have a combined repertoire of 58 high-level commands, each one requiring 2 or
3 keystrokes for the command mnemonic, and (typically) from 0 to 3 arguments.
These programs are largely self-documenting through the use of
help-commands that offer information at three different levels of detail.
\ssect Implementation Statistics.
The programs described in items 7 and 8 above presently comprise over 10,000
lines (excluding blanks) of sparsely commented (but fairly readable) MacLisp
code. This breaks down roughly as follows:
$$\vcenter{\halign to size{\hfill#\tabskip 0pt plus 2pt
⊗#\hfill\tabskip 0pt\cr
2700 lines:⊗REASON commonsense reasoner (Gabriel)\cr
200 lines:⊗Tautology checker (Gabriel)\cr
1400 lines:⊗ATREAS commonsense reasoner (Creary)\cr
600 lines:⊗Context mechanism shared by both reasoners (Gabriel)\cr
2400 lines:⊗Interactive research environment for ATREAS/NFLT (Creary)\cr
2800 lines:⊗NFLT: programs for translation, display, logical analysis,\cr
{}⊗and uniquifying formula storage. (Creary)\cr
400 lines:⊗Conceptual structures for ATREAS/NFLT (Creary)\cr}}$$
These are the programs that we are currently using on a regular basis in our
experimentation and development work.
\sect Formalisms for Reasoning about Programs.
Carolyn Talcott has been studying formal systems and structures for representing
and proving properties of programs, including
computations, programming systems, and programming environments.
One objective is to account for and elucidate programming practices common
in the Artificial Intelligence community. This includes studying the
kinds of programs and structures used both in building systems for
representing knowledge, reasoning, and planning, and in building systems
for building systems.
An important aspect of the AI style of programming is the use of programs
and other complex structures in a variety of ways. Some examples are:
\ni 1. Using programs to compute (partial) functions. This is the `obvious'
use of programs. The important point is that the data structures these
programs use often represent complex objects of the sort not usually
treated in work on program verification or specification.
\ni 2. Using programs as functional arguments.
For example, a mapping function takes a function and a structure
as arguments and produces a new structure by applying the given
function to each component of the input structure.
Search routines are often parameterized by functions for
generating successor nodes or by functions for evaluating positions.
\ni 3. Using programs as functional values. Specifying some of the
arguments of a function and then `partially' applying that function
produces a second function as a value. The function
$λ(\hbox{x},\hbox{y})\hbox{x}+\hbox{y}$, when
supplied with the argument 7 for the parameter y, yields the function
$λ(\hbox{x})\hbox{x}+7$, which adds 7 to its
argument. Combinators applied to functions return functions as values.
For example $\hbox{compose}(\hbox{f},\hbox{g})(\hbox{x}) =
\hbox{f}(\hbox{g}(\hbox{x}))$ and
$\hbox{subs}(\hbox{f},\hbox{g})(\hbox{x}) =
(\hbox{f}(\hbox{x}))(\hbox{g}(\hbox{x}))$.
\ni 4. Using programs to represent procedural information.
Information in data bases, knowledge, and problem solving strategies
may be represented procedurally (as a program).
\ni 5. Using programs to represent infinite or partially completed objects
such as streams, sets, or continuations. Suppose there are many solutions
to a given problem. One means of representing the solutions is by a program
which, when queried, produces a solution and another program representing
the remaining solutions.
\ni 6. Using programs as functions that specify control structure.
Co-routines (co-operating processes) can be expressed using functions as
can many forms of delayed evaluation. For example $λ()\hbox{f}(\hbox{x})$
represents f applied to x, but will only be evaluated if explicitly called
for (by application to an empty list of arguments).
\ni 7. Using programs as `actors' and as behavioral descriptions of complex
data structures. That is, instead of implementing a data structure as
a block of storage along with a set of programs for manipulating that
storage, one can write a program which takes manipulation names
and values as arguments and which updates its internal environment
to achieve the effect of the desired data structure, perhaps returning a
program representing the updated data structure as a value.
\ni 8. Using programs as objects that can be constructed, transformed,
interpreted, and compiled. We include here definition of data structures,
compiling, and program synthesis.
Other aspects of this style of programming include non-local control
mechanisms such as catch and throw, macros and metafunctions (for example,
to define new constructs or implement convenient abbreviations), `hooks'
into the underlying system (hooks provide dynamic access to the symbol
table and other data structures used by the interpreter and other system
routines), and use (often interchangeably) of both compiled and
interpreted code.
\ssect Goals and applications.
Our goal is to be able to treat the diverse uses of programs in an
integrated system.
We want to develop informal and formal methods to express and prove
properties that reflect naturally the intent of system designers and
programmers. The informal part is based on a model of computation
(computation structure) that naturally accounts for the various styles of
programming encountered in AI systems. There are two ways to mechanize
this model. One is to encode the structure as data. The second is to
define a formal theory for proving assertions about the structure. The
connection between the two provides additional means of expressing
properties of the system.
Although the initial work has been abstract and technical, there
are many practical applications. Some of these are summarized
below along with some important extensions.
\ni 1. Mechanization of the system via formalization of the metatheory and
model theory will provide an implementation of the system and a basis for
mechanical proof checking. It also will provide tools for building
systems---by providing data types corresponding to some of the complex
structure used in these systems.
\ni 2. We will be able to prove properties of `real' Lisp programs, such as
programs that contain arbitrary assignments, non-local control such as
CATCH and THROW, as well as programs containing MACROs and META functions.
We will also treat programs that act on memory structures---such programs
in Lisp use operations that are destructive on list structure, such as
RPLACA and RPLACD.
\ni 3. Abstract data structures (domains and operations) will be formulated in
a natural manner within the proposed system. Thus it will be used as a
framework for understanding data abstraction and, in particular, for
analyzing the notion of parameterized data type, which has been the subject
of recent investigation from several points of view. See, for example,
\ref:Thatcher.1979. and \ref:Cartwright.1980..
\ni 4. We will formalize the notion of a computation system as computation
theory plus implementation. Here we move from the static aspects of a
computation theory to the dynamic aspects of interactive systems. An
interactive and dynamically changing system will be explained in terms of a
theory, a model, and an interpretation (dynamically) connecting the two.
\ni 5. Formalization of the dynamic aspects of a programming system will
provide a framework for talking about declarations and definitions,
debugging, handling ERROR conditions, and distinguishing between different
kinds of undefinedness---abnormal termination versus non-termination. A
clean, simple description of a system makes provision for and use of debugging
facilities easier, more straight-forward, and more effective.
\ni 6. We will be able to develop a model and implementation for logic
programs. Easily identified, mathematically smooth subsets of the
computation theory can be interpreted as a model for logic programs. Our
constructions are uniformly paramaterized by the data domains on which
computation is being defined. This makes the connection with logic
programs ($\hbox{logic} (=\hbox{data})+\hbox{control} (=\hbox{PFN})$)
natural. A `PFN' is a computational equivalent to a partial function.
\ni 7. Ideas for expressing properties of high-level program descriptions
are useful for compiling and automatic program optimizations.
\ssect Understanding programming systems.
The sort of programming system we wish to study is a full interactive
Lisp-like system including language, interpreter, compiler, and debugger.
Our approach to understanding such a system---for instance, to be able to
make precise statements about its behavior, semantics, and extensions---is
to begin with a model of computation (computation structure) that reflects
the computational aspects of the system. We then consider encodings of
this model in suitable data domains.
Within our model the semantics of a programming language can be expressed
in a number of ways, for example as the function computed by an
applicative program or by using structures representing the computation
defined by a program. The connections between a program, the function it
computes, and the computation it defines can be made precise. The
different views of programs (a) as functions---which may take functions as
arguments and return functions as values---and (b) as data can be
mechanized. This is crucial for applications of the theory to other areas
of reasoning such as building models or plans.
\ssect Construction of computation structures.
A computation structure contains data, PFN's, a means of evaluating
expressions, a means of applying PFN's to arguments, and notions of
subcomputation. Computation structures are constructed uniformly from
data structures---a collection of data and operations taken to be
primitive. There is a natural domain of values extending the basic data
domain and including all PFN's as values. Applicative, imperative,
iterative, recursive, and many other styles of programming are naturally
expressible as PFN's.
We begin with a data structure---the collection of data and operations
taken to be primitive---and construct from it a computation structure.
Computation is defined using conditional expressions, abstraction, application,
environments, and closures. There is a natural domain of values extending
the basic data domain and including all PFN's as values.
Applicative, imperative, iterative, recursive, and many other styles of
programming are naturally expressible as PFN's.
Computation structures are uniformly parameterized by data structures
in the sense that the constructions define an effective mapping from
data structures to computation structures.
Two notions of computation are distinguished, which are called
`computation over a data structure' and `computation over a memory
structure.' Computations over data structures use only the primitive
operations that construct and examine data; for example, in pure Lisp
they are CAR, CDR, CONS, EQUAL, and ATOM. Computations over memory structure
use, in addition, destructive primitive operations, such as RPLAC's and
array assignments. The same basic language serves to define either form
of computation.
Our notion of computation theory draws on ideas of an equation calculus
\ref:Kleene.1952. \ref:McCarthy.1963., hereditarily consistent functionals
\ref:Platek.1966., abstract recursion theory \ref:Moschovakis.1969.
\ref:Fenstad.1980., and
primitive recursive functionals \ref:G\"odel.1958.. The resulting programming
system is related to SCHEME \ref:Steele.1975..
\ssect Multiple arguments and values.
In a computation structure PFN's are applied to a single argument which is
a `cart' of objects. The result of applying a PFN, when defined, is a
cart of values. Carts are like lists, but in a computation the list is
not constructed---virtual lists. They provide a natural means of
expressing computations with multiple arguments and multiple values. The
use of carts suggests a natural extension to the usual methods for
algebraic specification of data types. It also suggests an interesting
class of (rational) operations on data domains.
\ssect Subsystems.
A useful technique for reasoning about programs is to identify natural
subclasses of PFN's and to formulate natural embeddings
among the classes where appropriate. Specification of properties and
reasoning can then be carried out in the corresponding limited domain.
In our basic model many useful classes arise naturally from purely
`syntactic' considerations. These include purely applicative,
simple imperative (loops with assignment limited to local
context), mixed imperative and applicative, and iterative.
There are also notions of `shape' and of `well-shaped' PFN's and
computations that seem to be useful for identifying interesting classes of
PFN's. Shape is an intensional---essentially syntactic---analog to the
logical notion of type. Logical types are sets constructed from base sets
using constructions like cartesian product and exponential (function-space
formation). See \ref:Scott.1975., \ref:Feferman.1977., or \ref:Feferman.1981..
Shape is related to notions of type or functionality used in combinatory
logic \ref:Hindley.1969. and \ref:Coppo.1980..
Shape is also related to notions of type used by programming and proof
systems that allow parameterized types \ref:Milner.1978..
Certain properties and principles hold for a PFN by virtue of
its being well-shaped, and this can be used to simplify and guide proofs.
It can also be useful in the design of programs and for expressing
properties that may be of use to a compiler.
Another way of identifying interesting classes is to embed one computation
structure in another. For example, to take advantage of methods developed
in temporal logic one would embed a temporal model of computation and
apply the temporal methods to the resulting subsystem.
\ssect Computations and programs as data.
The mechanization (formal encoding) of a computation structure turns PFN's
and computations into data allowing us to speak of operations on PFN's and
on computations. In addition to being able to represent operations, we
can discuss properties of these operations. Some properties of interest
are:
\numitem{i.}Soundness---preserving the function computed;
\numitem{ii.}Effect of an operation on the properties of the computation defined by a
program. Properties of interest include
recursion depth, memory usage, and other measures of execution cost;
\numitem{iii.}Program size.
The traditional kinds of operations on programs include transformations,
translations and program construction.
A further kind of operation on programs is the construction of derived
programs; this is based on an idea of McCarthy. Derived programs compute
properties of the original program. Complexity measures can be expressed
via derived programs. For example, depth of recursion, number of CONSes
executed, or number of subroutine calls are typical complexity measures
for Lisp. Properties of the these derived programs are similar to
properties treated in the traditional work in analysis of algorithms.
Many other properties of a program can be computed by derived programs.
Some examples are structures representing traces or complete computation
trees and other structures useful for debugging.
\ssect Notions of equivalence on computation structures.
When programs are used in the different contexts and with the variety
of interpretations discussed above, questions such as correctness of
program transformations become complex. In order specify the intent
and correctness of programs and operations on programs we must be able
to express the precise sense in which programs are to be considered equivalent.
The classical (extensional) notion is that PFN's are equivalent if
they are defined on the same domain and have the same value on all arguments
in the domain of definition. When arguments and values can be PFN's this
is not an adequate definition.
Notions of equivalence are also important simply to express the fact that
different programs define the same computations or have the same
behavior.
\ssect Criteria for an adequate formal system for reasoning about PFN's.
In an adequate formal system definitions and assertions as well as
reasoning about computation structures should be naturally and efficiently
expressed. This implies a number of criteria for such a system.
\ni 1. Data and PFN's should be part of the domain of discourse. PFN's should
behave, in some respects, as functions. They should also be able to be
arguments to and values of PFN's. The collection of PFN's that can be
proved to exist should be closed under the usual constructions on
computable functions. These constructions include conditionalization,
abstraction, application, composition, closure, iteration (corresponding
to loop programs), and recursion. Combinatory operations on PFN's should
be representable---as PFN's. There should be principles for introducing
schemata for proving properties of special classes of PFN's.
\ni 2. There should be a means of introducing domains (sets) and operations
on domains including boolean operations, cartesian product, and formation of
finite sequences or trees. Limited principles of inductive definition
together with the corresponding principles for proof by induction are also
needed. Domains may contain data, PFN's, or both.
\ni 3. The system should be `parameterized' by the intended computation
domain (the primitive data types and operations). This reflects the
principle of separation of control and data which is important for
modularity and stability of a computation system.
There have been many systems developed by logicians for formalizing
various kinds of mathematics including constructive analysis, constructive
algebra, and logic. Many of the results can be easily modified to apply
to reasoning about programs. Of particular interest is the work of
\ref:Feferman.1974., \ref:Feferman.1978., and \ref:Feferman.1981. on formal
systems for constructive and explicit mathematics (analysis). Other
relevant work includes \ref:Beeson.1980., \ref:Martin-L\"of.1979.,
\ref:Nordstr\"om.1981., \ref:Scott.1977., and \ref:Tennant.1975..
\ssect Accomplishments.
\ni 1. The definition of the applicative fragment of the computation structure
(called RUM) has been completed. This includes the associated notions of
evaluation of expressions, application of PFN's, and the subcomputation
relation.
\ni 2. A variety of simple examples have been worked out in RUM. These include:
\numitem{i.}representation of numbers and number-theoretic functions.
\numitem{ii.}definition of several combinators---as examples of the
expressiveness of both PFN's and of multiple arguments and values.
\numitem{iii.}computing with streams/continuations---statement and proof of
correctness of a simple pattern matcher using continuations to do
backtracking. The pattern matcher produces a stream of matches.
\numitem{iv.}representation of tree-like structures using PFN's.
\numitem{v.}representation of re-entrant (circular) memory structures using PFN's.
These are essentially the structures implicit in PROLOG when `occur check'
is omitted from unification. Using PFN's, structures corresponding to
solutions of sets of equations can be represented, but it seems that
destructive operations are not representable as (extensional) operations
on PFN's.
Detailed proofs of properties of the structures and PFN's defined were given.
\ni 3. RUM was extended to include CATCH and THROW (non-local control)
constructs. Example proofs of properties of PFN's using these constructs
were worked out. A computation-preserving map from the extended RUM to the
original was defined---CATCH elimination.
\ni 4. We defined a class of PFN's called iterative PFN's which extends the
usual `first order' notion of (flowchart) programs \ref:Manna.1978.. We
generalized the invariant assertion, intermittent assertion, and subgoal
assertion methods of verification to this class of PFN's. We proved the
Manna formulae \ref:Manna.1969. for partial and total correctness in this
setting and also the equivalence of his total correctness formula to the
intermittent assertions formula.
\ni 5. A Computation Induction scheme for recursively defined PFN's was defined
and used in many of the examples.
\ni 6. A paper describing the above work is in preparation.
\ni 7. Some progress has been made towards a formal theory. Much work remains
to be done.
\sect EKL.
Jussi Ketonen and Joseph Weening have been working on further development of
EKL, an interactive proof checker in high-order predicate calculus.
The emphasis has been on creating a system and a language that would
allow the expression and verification of mathematical facts in a direct
and natural way. The approach taken has been quite successful: Ketonen has
been able to produce an elegant and eminently readable proof of Ramsey's
theorem in under 100 lines. At the same time EKL is versatile enough to be
able to verify the associativity of the Lisp APPEND function in just one
line.
The above results are important in that they indicate that programs
previously thought to be too complex for mechanical verification are now
within the reach of sufficiently powerful proof-checking systems.
\ssect Accomplishments.
\ni 1. A manual reflecting the state of EKL during December 1982 was written
by Weening and Ketonen.
\ni 2. EKL has been tested by its use in CS206, a course on Lisp programming
at Stanford. As homework assignments, students used it to prove facts
about Lisp programs.
\ni 3. One of the outgrowths of this development work has been a formalisation
of the underlying logic of EKL. EKL was based on a high-order predicate
logic since we felt that it is important to be able to discuss functionals
and lambda-abstraction in a natural way. At the same time it seemed to us
that the currently fashionable formalisations of high-order logic were
still inadequate for expressing mathematical facts. Our approach was to
gradually modify the logic of EKL to accomodate intuitively
straightforward extensions without worrying too much about the formal
underpinnings. It is satisfying to note that we have now come full
circle: Ketonen has shown that the logic of EKL can be formally expressed
(along with its consistency proofs, etc.) in a very elegant and precise
way---in fact, in a more elegant way than the logics we started out with.
This theory also embodies the way EKL can talk about meta-theoretic
entities---to our knowledge it is one of the first instances of a logical theory
of mathematics that can discuss denotations in a coherent way. Ketonen
and Weening are in the process of writing a paper on this. A large amount
of effort has been spent on the EKL rewriting system---currently the most
powerful and versatile component of EKL. Ketonen has come up with a new
architecture for rewriting systems that is quite different from the ones
currently used, say, by Boyer and Moore. Accompanying the rewriter there
is also a notion of a language for rewriting---how to control the process
through simple instructions to EKL. This has turned out to be a fairly
powerful tool in reducing the lengths of proofs in EKL.
\ni 4. A decision procedure for an important fragment of the `obvious' theorems
of first order logic was designed and implemented.
\sect Lisp Performance Evaluation.
Richard Gabriel has been studying the issue of performance evaluation of Lisp
systems, both in general by studying the ingredients of performance
of a Lisp implementation and in particular by benchmarking a number
of important existing Lisp implementations.
\ssect Accomplishments.
\ni 1. 19 Lisp benchmarks were designed. At the Lisp and Functional
Programming Conference, the attendees interested in performance evaluation
stated that they would prefer to see Richard Gabriel do all of the
benchmarking personally. Therefore, he is currently benchmarking those
implementations most often inquired about. These are the Xerox D series
(Dolphin, Dorado, and Dandelion), the Symbolics LM-2 and 3600, Vax Lisps
(Franz, DEC/CMU Common Lisp, InterLisp-Vax, NIL, and PSL), MC68000-based
Lisps (Apollo running PSL, SUN running PSL and Franz, and HP-9836 running
PSL), and DEC-10/20 Lisps (MacLisp, InterLisp-10, and Elisp).
\ni 2. One benchmark has been run on all of the various Lisp configurations, and
there are 75 measurements. These results have been made available to several
ARPA contractors to aid hardware acquisition decisions.
\ni 3. A paper, ``Performance of Lisp Systems,'' has been written (with Larry
Masinter of Xerox PARC) and published in the proceedings of the ACM
Symposium on Lisp and Functional Programming; this paper discusses the
methodological issues for performance evaluation and presents some of the
results mentioned in 1. This paper will be part of the final report
for ARPA contract N0039-82-C-0250.
\ni 4. Some qualitative statements on the Lisp systems in the study have been
gathered and will be included in the above-mentioned final report. These are
facts about the Lisp systems that are
of interest in comparing implementations and which are not performance-centered.
\ni 5. A detailed analysis of the DEC/CMU Vax Common Lisp has been completed by
Richard Gabriel and Martin Griss (from the University of Utah). This analysis
is being prepared for publication.
\sect Common Lisp.
Both Richard Gabriel and John McCarthy have been active in the
development and design of Common Lisp.
Common Lisp is the combined effort of eight different Lisp implementation
groups aimed at producing a common dialect of Lisp while allowing each
group to exploit its own hardware. These groups are: Spice Lisp at CMU,
DEC Common Lisp on Vax at CMU, DEC Common Lisp on DEC-20 at Rutgers, S-1
Lisp at LLNL, Symbolics Common Lisp, LMI Common Lisp, Portable Standard
Lisp at Utah, and Vax NIL. Common Lisp is a set of documents, a language
design, and a common body of code.
The main goal of Common Lisp is to unify the experiences of the
Lisp implementations that are descended, either historically or
philosophically, from MacLisp. MacLisp is a dialect of Lisp that
emphasizes ease of system-building and efficient execution of numeric
code.
Other goals of Common Lisp are to provide a workbench programming
language for artificial intelligence, symbolic algebra, robotics,
and education for the next decade. With such a commonly available
and commonly used Lisp it would be possible for researchers to
exchange programs with a minimum of effort, even though the host
computers could be very different. Such a Lisp would be powerful enough
to write a sophisticated operating system sufficient to support its
own use and would, thereby, prove to be a useful systems research tool
as well as a tool for creating Lisp machine environments.
With a powerful Lisp widely available, research centers might not have
to consider the language features of the Lisp(s) available on various
computers as part of the decision regarding what hardware to purchase.
In this way a more intelligent and effective choice could be made.
Common Lisp should support an easily-understood, portable programming style.
By making a large number of packages written in Lisp available it is hoped
that not only would the amount of work needed to get a system running be
low, but the overall programming style and methodology would be improved
through exposure to the code written by expert programmers.
\ssect History.
After the usefulness of the PDP-10 as a Lisp engine declined because its
address space was limited to 18 bits, several implementation groups
started to implement Lisps for computers with larger address spaces, and
these implementors tried to improve and extend MacLisp, both to take
advantage of lessons learned from MacLisp and the various languages built
on top of it (Scheme, for example) and to also exploit the architectures
the implementors were using.
It soon became clear to several of the implementors (Richard Gabriel of
Stanford and Lawrence Livermore National Laboratory, Guy L. Steele Jr. of
Carnegie-Mellon University, Jon L. White then of the Massachusetts
Institute of Technology, and Scott Fahlman of Carnegie-Mellon University)
that the situation of four different Lisps---Vax NIL, SPICE Lisp, Lisp
Machine Lisp, and Franz Lisp---all descended from MacLisp, all used by
ARPA-sponsored institutions, and each different from the others in subtle,
yet important, ways, was intolerable to the research community. Several
key meetings among the implementors resulted in an initial commitment to
unify the approaches in the form of a large, common subset, which was to
be called `Common Lisp.' Of the four major Lisp implementations, only
Franz Lisp did not appear interested in joining the Common Lisp group, and
for financial rather than philosophical reasons.
Since this informal group got together, the language, Common Lisp,
has been largely designed and a manual written that describes the
language at user-level. The original informal group has expanded into a
larger, formal committee which is collaborating on the language design and
documentation in a loose fashion. This group, called the `Common Lisp
Group', maintains a mailing list and an archive of working messages at
SAIL (Stanford Artificial Intelligence Laboratory).
Many decisions have been made about the political organization of Common
Lisp and about what the manual does and does not require of an
implementation.
To be most useful, Common Lisp must support a large body of packages and
routines, so that a tradition of code, so to speak, is maintained. For
example, pattern matchers, object-oriented programming packages, and
indexing routines should be available so that projects requiring these
tools can use existing, well-tested code. In addition, these packages
ought to be of certified quality of functionality and documentation.
If Common Lisp is to be widely used it must be available on hardware not
currently supporting Common Lisp dialects. Since Common Lisp is a large
language, producing a working Common Lisp simply from the manual would
require several man-years of effort. One of the next tasks to be
undertaken is to define Common Lisp in terms of a {\sl virtual machine}
description.
Such a description is a formal definition of a number of primitives that
must be defined on the hardware in question. Then the remainder of Common
Lisp is defined as a body of Common Lisp code written with the assumption
that these primitives exist. Once these primitives are defined on a piece
of hardware, a Common Lisp interpreter is gained by `loading' the body of
Common Lisp code.
\ssect Compilers.
Richard Gabriel has been working on compilers for Common Lisp, targetting
high-performance, stock-hardware computers---in this case the S-1 Mark IIA
supercomputer \ref:Brooks.1982a. \ref:Brooks.1982b. \ref:Brooks.1983..
Discussion with Prof. Martin Griss from the University of Utah on the
topic of portable compilers has led to interest in a joint
high-performance portable compiler project for Common Lisp between Stanford
and Utah.
It it hoped that this compiler would be targettable both to medium-performance
computers, such as those based on MC68000 microprocessors, and to high-performance
multi-processors, such as the S-1 Mark IIA.
\sect Automatic Construction of Special-purpose Programs.
Chris Goad has been working on the automatic construction of
special-purpose programs. In software development the aims of efficiency
and generality are often in conflict. It is common that specialized
programs, which take into account complex details of the situation in
which they are used, can be made much faster than cleaner, more general
programs. The conflict can be eased through the use of a kind of automatic
programming in which fast, specialized programs for a collection of related
tasks are constructed automatically. This style of automatic programming
enterprise---which may be referred to as special-purpose automatic
programming---differs in several respects from the better known variety in
which programs are automatically derived from logical specifications.
First, the primary aim of special-purpose automatic programming is
efficiency rather than replacement of human programming effort, although
there is of course substantial overlap between these aims. Second, special-purpose
automatic programming is far more tractable at the current stage
of knowledge than the other variety, since the class of programs to be
generated by any one synthesizer of special-purpose programs is
comparatively small. We propose to continue our current efforts in
special-purpose automatic programming, with emphasis on applications to
computer graphics and computer vision. The problems in graphics and
vision that will be attacked take the form of automating the generation
of programs that are specialized to the task of rapidly displaying---or
recognizing---particular three-dimensional objects.
In both computer graphics and computer vision a geometric computation is
undertaken which involves three pieces of information: (1) a physical
configuration C of objects in space, (2) a position and orientation P of
the viewer relative to C, and (3) a digital image I. In computer
graphics, I is computed from C and P; in computer vision, C and P are
computed from I. In many practical applications of both computer vision
and computer graphics, the configuration C is fixed in advance for any
particular series of computations; it is only P and I that vary within the
batch of computations. On the computer graphics side, an example is
provided by flight simulation, where a landscape is fixed in advance of
any particular simulated flight. The appearance of this fixed landscape
must be computed from a viewer position and orientation which changes
every thirtieth of a second. On the computer vision side, examples are
provided by the many industrial applications of automated vision where the
vision task takes the form of recognizing and locating a particular
three-dimensional object in a digitized image. The exact shape of the
object to be perceived is known in advance; the purpose of the act of
perception is only to determine its position and orientation in space
relative to the viewer. Again, C is fixed in advance, while P and I vary.
Problems of the kind just described can be attacked by use of special-purpose
automatic programming. What one does is to generate a special-purpose
program $P↓C$ for each configuration $C$ to be dealt with; this
special-purpose program is then used for each computation in which the
configuration $C$ appears. The advantage of this approach is speed.
Since the special-purpose program $P↓C$ has a very limited task to
perform---it is adapted to handling just one configuration---it can be
much faster than any general-purpose program would be for the same
configuration.
Computer graphics and vision are good applications for special-purpose
automatic programming for several reasons. First, there is much to be
gained since speed is of central importance in many practical
applications. Second, the various applications within computer vision and
graphics have enough in common that fairly specific tools developed for
one application nonetheless carry over to others. Examples will be given
later in this proposal. Finally, automatic deduction plays a central role
in all varieties of automatic programming, including the special-purpose
variety. Each application of automatic programming gives rise to a
different class of propositions whose truth values need to be decided
automatically. The propositions which arise in graphics and vision
applications are for the most part simple geometric assertions. A well
developed technology for deciding geometric propositions exists---notably
in the form of decision procedures such as the simplex algorithm for
restricted classes of geometric problems. Applications of special-purpose
automatic programming to graphics and vision derive a substantial part of
their power from this existing technology for automatic geometric
deduction.
Goad started work on the application of
special-purpose automatic programming to computer graphics in early 1981;
work on applications to vision began in mid-1982.
As reported below, good results have been obtained for the hidden surface
elimination problem in computer graphics. Work on vision is
still underway, but the
preliminary results look very promising.
The same general scheme
has been employed for both applications, and there is also substantial overlap
in the specific machinery needed to implement the scheme.
In particular a central process in both applications is the automatic
design of an appropriate case analysis for the solution of the problem.
This case analysis design involves dividing up the class of possible
geometric configurations into subclasses within which regular behavior can
be obtained. In vision the restriction to regular behavior amounts to a
requirement that the visibility of features be determined and also that
the relationships among them depend in a smooth way on the position and
orientation parameters. In graphics regular behavior amounts to
the requirement that a single back-to-front ordering of
the surface elements to be displayed be valid within the case at
hand. In both graphics and vision, exhaustive forms of the case analysis
are obtained from general purpose algorithms, and their final, more
efficient form is obtained by a specialization process which depends
heavily on geometric automatic deduction. The role of automatic deduction
is essentially that of determining which sets of relevant geometric
conditions (relevant to regularity in the appropriate sense) can co-occur,
and which cannot.
Based on our experience so far, we think it very likely that a wide class
of related applications in graphics and vision can be attacked with a
comparatively small collection of tools for the automatic synthesis of
special-purpose programs. The tools will lie at two levels of generality.
First, there are absolutely general tools for program manipulation (such as
unfolding and pruning) which are completely independent of subject matter,
and, second, there are tools which apply to a variety of algorithmic
problems within a particular mathematical domain. The mathematical domain
which underlies the class of problems which we have chosen is
three-dimensional geometry, and the domain-specific (but not problem-specific)
tools that we use include tools for the systematic construction of
geometric case analyses and for geometric deduction. Three-dimensional
geometry is a good choice for this kind of work since, as a result of a
vast amount of previous effort, there are many existing tools and methods
to be exploited.
\ssect Accomplishments.
\ni 1. A program for synthesis of special-purpose hidden surface
elimination programs has
been implemented and tested on a large-scale example provided by the
Link-Singer Corporation. On this example, the generated program was 10 times
faster than the fastest and most widely used of the previously known algorithms.
This work is described in \ref:Goad.1982..
\ni 2. A synthesis method for special-purpose vision programs has been
designed and partly implemented. The special-purpose vision programs
which are generated have the task of recognizing and locating particular
objects in an image. The general scheme employed for object recognition
involves sequential matching of object features to image features. By
exploiting the advance information available about the object being
sought, very large speed-ups (perhaps as much as 100-fold) can be
obtained. The practical outcome is that the `high-level' aspects
(that is, the matching process) of model-based 3-dimensional vision can be made
fast enough for industrial applications---even when running on a
micro-processor.
\sect Epistemology and Formalization of Concurrent and Continuous Action.
Yoram Moses has been working on the epistemology and formalization of
concurrent and continuous action. A key barrier to progress in artificial
intelligence is that present systems deal mainly with discrete events that
result in a definite new situation rather than with events occuring
concurrently and continuously. This work bears some relation to concurrent
programming, but there are some definite differences. The synchronization
problem, crucial in the concurrent programming case, seems less central
here, whereas the handling of partial information and causality are more
important for an artificial intelligence program.
Hans Berliner's thesis presents a chess position that is a forced win for
white, but this fact is not straightforwardly computable in a `reasonable'
amount of time. Further, it appeared to McCarthy that the natural proof
of this fact involves an essentially concurrent argument. A proof has
been worked out in detail which uses a bottom-up approach and does not use
concurrent arguments. Thus this is no longer a valid example problem.
The examples that are being considered now involve real-world activities,
such as the decision-making needed to cross a street without being hit
by a car and to pass by a revolving sprinkler without getting wet.
Both of these problems require observation and prediction of continuous
events as well as the concurrent activity of the agent and the surrounding
environment.
Eventually it is hoped that a system could be built that would be capable
of guiding a robot in a competitive two-sided sport such as tennis. Of
course that is a long term goal, and the first steps in that direction
will be pursued.
In epistemology, some special cases of McCarthy's circumscription are
being analyzed, specifically the propositional case, and generalizations
and insight into circumscription are expected to result.
A paper is being prepared dealing with the representation and encoding of
relations by graphs. This paper will be presented at the SIAM Symposium on
the Applications of Discrete Mathematics held in June 1983.
\sect Proving Facts About Lisp.
Joseph Weening has been investigating the systematization of proving of facts
about Lisp programs. In contrast to earlier work, an attempt is made
to consider the full range of Lisp programs in a dialect such as MacLisp
or Common Lisp. The `systematic' aspect will be to represent both
programs and proofs in a framework that can be manipulated by a computer.
The main reason for doing this is to allow verification, by the computer,
of the proofs which are presented.
Earlier work in this direction (cf. McCarthy and Cartwright, Boyer and
Moore) used a subset of Lisp, generally known as `pure Lisp,' which
consists of systems of recursive functions defined by conditional
expressions and primitive functions for numeric and list manipulation.
The aspects of Lisp that are not treated in this way include (1)
assignment statements, which change the values of variables; (2)
destructive operations, which change data structures; (3) sequential
programs; and (4) non-local control structures such as CATCH and THROW.
\sect References.
\baselineskip 12pt
\makeref:Beeson.1980.{
Beeson, M., \article{Formalizing Constructive Mathematics: Why and How?} in
Richman, F.(ed.) \book{Constructive Mathematics, Proceedings New Mexico}, 1980
Lecture Notes in Mathematics 873, Springer-Verlag (1980) pp. 146--190.}
\makeref:Brooks.1981.{
Brooks, R. A., \article{Symbolic reasoning among 3D models and 2D images}, AI
Journal, vol 16, 1981.}
\makeref:Brooks.1982a.{Brooks, R. A., Gabriel, R. P.,Steele, G. L.,
\article{An Optimizing Compiler for Lexically-Scoped Lisp},
SIGPLAN 82 Symposium on Compiler Construction, ACM,
June 23--25, 1982.}
\makeref:Brooks.1982b.{Brooks, R. A., Gabriel, R. P.,Steele, G. L.,
\article{S-1 Common Lisp Implementation},
1982 ACM Symposium on Lisp and
Functional Programming, August 15--18 1982.}
\makeref:Brooks.1983.{Brooks, R. A., Gabriel, R. P.,Steele, G. L.,
\article{Lisp-in-Lisp: High Performance},
submitted to the International Joint Conference on Artificial Intelligence,
August 1983.}
\makeref:Cartwright.1980.{
Cartwright, R., \article{A Constructive Alternative to Axiomatic Data Type
Definitions}, Proceedings LISP Conference, Stanford (1980).}
\makeref:Coppo.1980.{
Coppo, M. Dezani-Ciancaglini Venneri, B.,
\article{Principal Type Schemes and Lambda Calculus Semantics}
in Seldin, J. P. Hindley, J. R. (eds.)
\book{To H. B. Curry: Essays on Combinatory Logic, Lambda Calculus and Formalism},
Academic Press (1980)
pp.535--560.}
\makeref:Creary.1982.{
Creary, L. G.,\article{A Language of Thought for Reasoning About Actions},
(unpublished working
paper on NFLT, June, 1982).}
\makeref:Creary.1983.{
Creary, L. G.,\article{On the Epistemology of Commonsense Factual Reasoning:
Toward an Improved
Theoretical Basis for Reasoning Programs}, (forthcoming Stanford CS Report).}
\makeref:Feferman.1974.{
Feferman, S.,
\article{A Language and Axioms for Explicit Mathematics}
in Dold, A. and Eckman, B. (eds.) \book{Algebra and Logic},
Lecture Notes in Mathematics 450, Springer-Verlag (1974)
pp.87--139.}
\makeref:Feferman.1977.{
Feferman, S.
\article{Theories of Finite Type Related to Mathematical Practice}
in Barwise, J. (ed.)
\book{Handbook of Mathematical Logic},
North-Holland
pp.913-971.}
\makeref:Feferman.1979.{
Feferman, S.,
\article{Constructive Theories of Functions and Classes}
in Boffa, M., vanDalen, D., and McAloon, K.(eds.)
\book{Logic Colloquium 78}, University of Mons, Belgium (August 1978)
North-Holland (1979)
pp. 159--224.}
\makeref:Feferman.1981.{
Feferman, S.,
\article{Theories of Variable Finite Type},
(draft).}
\makeref:Feferman.1982.{
Feferman, S.,
\article{Inductively Presented Systems and the Formalization of Meta-Mathematics}
in vanDalen, D., Lascar, D.,and Smiley, J. (eds.)
\book{Logic Colloquium 1980},
North Holland 1982
pp. 95--128.}
\makeref:Fenstad.1980.{
Fenstad, J. E.,
\article{General Recursion Theory: An Axiomatic Approach}
Springer-Verlag.}
\makeref:Gabriel.1982.{
Gabriel, R. P., Masinter, L. M.,
\article{Performance of Lisp Systems},
Proceedings of the 1982 ACM Symposium on Lisp and Functional
Programming, August 1982.}
\makeref:Gabriel.1983.{
Gabriel, R. P., Griss, M. L.,
\article{Lisp on Vax: A Case Study},
in preparation.}
\makeref:Goad.1982.{
Goad, C. A., \article{Automatic construction of special purpose programs for
hidden surface elimination}, Computer Graphics, vol. 16 No. 3, July 1982,
pp. 167--178.}
\makeref:G\"odel.1958.{
G\"odel, K.,
\article{On an Extension of the Finite Standpoint Never Used Before 1958},
Dialectica 12 (1958).}
\makeref:Hindley.1969.{
Hindley, R.,
\article{The Principle Type-Scheme of an Object in Combinatory Logic},
TAMS 146
pp. 29--60.}
\makeref:Ketonen.1982.{Ketonen, J., Weening, J.,
\article{EKL---An Interactive Proof Checker}, user's reference manual,
in circulation.}
\makeref:Ketonen.1982.{Ketonen, J., Weyhrauch, R.,
\article{A Decidable Fragment of Predicate Calculus}, submitted for
publication.}
\makeref:Kleene.1952.{
Kleene, S. C.,
\article{Introduction to Metamathematics},
Van Nostrand.}
\makeref:Manna.1969.{
Manna, Z.,
\article{The Correctness of Programs},
J Computer and System Sciences 3,
pp. 119--127.}
\makeref:Manna.1978.{
Manna, Z.,
\article{Six Lectures on The Logic of Computer Programming},
SIAM CBMS-NSF Regional Conference Series in Applied Mathematics No. 31 (1980).}
\makeref:Martin-Lof.1979.{
Martin-Lof, P.,
\article{Constructive Mathematics and Computer Programming},
International Congress of Logic Methodology and Philosophy of Science
(to appear).}
\makeref:McCarthy.1959.{
McCarthy, J., \article{Programs with Common Sense},
Proc. Int. Conf. on Mechanisation of Thought Processes, Teddington, England,
National Physical Laboratory, (1959).}
\makeref:McCarthy.1963.{
McCarthy, J.,
\article{A Basis for a Mathematical Theory of Computation}
in Braffort, P. and Herschberg, D. (eds.), \book{Computer Programming and
Formal Systems},
North-Holland (1963)
pp. 33--70.}
\makeref:McCarthy.1982.{
McCarthy, John, \article{Coloring Maps and the Kowalski Doctrine},
Stanford CS Report 82-903 (April 1982).}
\makeref:Milner.1978.{
Milner, R.,
\article{A Theory of Type Polymorphism in Programming},
J Comp.Sys.Sci 17,
pp. 302--321.}
\makeref:Moschovakis.1969.{
Moschovakis, Y. N.,
\article{Abstract First Order Computability I, II},
Trans.Am.Math.Soc. 138 (1969),
pp. 427--464, 465--504.}
\makeref:Norstr\"om.1981.{
Norstr\"om, B.,
\article{Programming in Constructive Set Theory: Some Examples},
Proc. ACM Conference on Functonial Programming and Languages and
Computer Architecture (October 1981),
pp. 141--153.}
\makeref:Platek.1966.{
Platek, Richard A.,
\article{Foundations of Recursion Theory},
PhD Thesis, Stanford University, 1966.}
\makeref:Scott.1975.{
Scott, D.,
\article{Data Types as Lattices},
SIAM J. Computing 5(1976)
pp. 522--587.}
\makeref:Scott.1977.{
Scott, D.,
\article{Identity and Existence in Intuitionistic Logic}
in Fourman, M. P., Mulvey, C. J., and Scott, D. (eds.),
\book{Applicatons of Sheaves, Proceedings Durham 1977},
Lecture Notes in Mathematics 753, Springer-Verlag (1979)
pp. 660--696.}
\makeref:Steele.1975.{
Steele, G. L., Sussman, G. J.,
\article{SCHEME: an Interpreter for Extended Lambda Calculus},
MIT AI Memo 349 (December 1975).}
\makeref:Tennant.1975.{
Tennant, N.,
\article{Natural Deduction for Restricted Quantification Identity and Descriptions},
5th International Congress of Logic Methodology and Philosophy of Science (1975)
pp. I--51.}
\makeref:Thatcher.1979.{ Thatcher, J. W., Wagner, E. G. and Wright, J. B.,
\article{Data Type Specification:Parameterization and the Power of
Specification Techniques}, IBM Research report RC-7757 (1979).}
\vfill\eject
\sect Publications.
\baselineskip 12pt
\makeref:Creary.1981.{
Creary, L. G.
\article{Causal Explanation and the Reality of Natural Component Forces},
Pacific Philosophical Quarterly 62, 1981, pp.148-157.}
\makeref:Creary.1982.{
Creary, L. G.
\article{A Language of Thought for Reasoning About Actions}, (unpublished working
paper on NFLT, June, 1982).}
\makeref:Creary.1983.{
Creary, L. G.
\article{On the Epistemology of Commonsense Factual Reasoning: Toward an Improved
Theoretical Basis for Reasoning Programs}, (forthcoming Stanford CS Report).}
\makeref:Gabriel.1982.{
Gabriel, R. P., Masinter, L. M.,
\article{Performance of Lisp Systems},
Proceedings of the 1982 ACM Symposium on Lisp and Functional
Programming, August 1982.}
\makeref:Gabriel.1983.{
Gabriel, R. P., Griss, M. L.,
\article{Lisp on Vax: A Case Study},
in preparation.}
\makeref:Goad.1982.{
Goad, C. A., \article{Automatic construction of special purpose programs for
hidden surface elimination}, Computer Graphics, vol. 16 No. 3, July 1982,
pp. 167--178.}
\makeref:Ketonen.1982.{Ketonen, J., Weening, J.,
\article{EKL---An Interactive Proof Checker}, user's reference manual,
in circulation.}
\makeref:Ketonen.1982.{Ketonen, J., Weyhrauch, R.,
\article{A Decidable Fragment of Predicate Calculus}, submitted for
publication.}
\makeref:McCarthy.1980.{
McCarthy, John, \article{Circumscription --- A Form of Non-Monotonic Reasoning},
Artificial Intelligence, Volume 13, Numbers 1,2, April.}
\makeref:McCarthy.1980.{
McCarthy, John and Talcott, Carolyn,
\article{LISP --- Programming and Proving}, course notes, Stanford University.
(to be published as a book).}
\makeref:McCarthy.1982.{
McCarthy, John, \article{Coloring Maps and the Kowalski Doctrine}, Stanford
CS Report 82-903 (April 1982).}
\vfill\end